[IA64] get rid of sync_split_cache
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Fri, 14 Apr 2006 20:20:04 +0000 (14:20 -0600)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Fri, 14 Apr 2006 20:20:04 +0000 (14:20 -0600)
Get rid of sync_split_cache.
Use flush_icache_range and ia64_fc instead.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
xen/arch/ia64/xen/domain.c
xen/arch/ia64/xen/privop.c
xen/arch/ia64/xen/xenmisc.c

index 8c996c722cf9d6472ceefb57cf447e079563dee6..93a10695bc1af5f5401b17e2a56a3c685ac3ca04 100644 (file)
@@ -339,8 +339,9 @@ int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c)
            d->arch.cmdline      = c->cmdline;
            d->shared_info->arch = c->shared;
 
-           /* FIXME: it is required here ?  */
-           sync_split_caches();
+           /* Cache synchronization seems to be done by the linux kernel
+              during mmap/unmap operation.  However be conservative.  */
+           domain_cache_flush (d, 1);
        }
        new_thread(v, regs->cr_iip, 0, 0);
 
@@ -784,50 +785,68 @@ static void loaddomainelfimage(struct domain *d, unsigned long image_start)
   
        copy_memory(&ehdr, (void *) image_start, sizeof(Elf_Ehdr));
        for ( h = 0; h < ehdr.e_phnum; h++ ) {
-               copy_memory(&phdr,elfbase + ehdr.e_phoff + (h*ehdr.e_phentsize),
-               sizeof(Elf_Phdr));
-           //if ( !is_loadable_phdr(phdr) )
-           if ((phdr.p_type != PT_LOAD)) {
-               continue;
-       }
-       filesz = phdr.p_filesz; memsz = phdr.p_memsz;
-       elfaddr = (unsigned long) elfbase + phdr.p_offset;
-       dom_mpaddr = phdr.p_paddr;
+               copy_memory(&phdr,
+                           elfbase + ehdr.e_phoff + (h*ehdr.e_phentsize),
+                           sizeof(Elf_Phdr));
+               if ((phdr.p_type != PT_LOAD))
+                   continue;
+
+               filesz = phdr.p_filesz;
+               memsz = phdr.p_memsz;
+               elfaddr = (unsigned long) elfbase + phdr.p_offset;
+               dom_mpaddr = phdr.p_paddr;
+
 //printf("p_offset: %x, size=%x\n",elfaddr,filesz);
 #ifdef CONFIG_DOMAIN0_CONTIGUOUS
-       if (d == dom0) {
-               if (dom_mpaddr+memsz>dom0_size || dom_mpaddr+filesz>dom0_size) {
-                       printf("Domain0 doesn't fit in allocated space!\n");
-                       while(1);
-               }
-               dom_imva = (unsigned long) __va(dom_mpaddr + dom0_start);
-               copy_memory((void *) dom_imva, (void *) elfaddr, filesz);
-               if (memsz > filesz) memset((void *) dom_imva+filesz, 0, memsz-filesz);
+               if (d == dom0) {
+                       if (dom_mpaddr+memsz>dom0_size)
+                               panic("Dom0 doesn't fit in memory space!\n");
+                       dom_imva = __va_ul(dom_mpaddr + dom0_start);
+                       copy_memory((void *)dom_imva, (void *)elfaddr, filesz);
+                       if (memsz > filesz)
+                               memset((void *)dom_imva+filesz, 0,
+                                      memsz-filesz);
 //FIXME: This test for code seems to find a lot more than objdump -x does
-               if (phdr.p_flags & PF_X) privify_memory(dom_imva,filesz);
-       }
-       else
-#endif
-       while (memsz > 0) {
-               p = assign_new_domain_page(d,dom_mpaddr);
-               if (unlikely(!p)) BUG();
-               dom_imva = (unsigned long) __va(page_to_maddr(p));
-               if (filesz > 0) {
-                       if (filesz >= PAGE_SIZE)
-                               copy_memory((void *) dom_imva, (void *) elfaddr, PAGE_SIZE);
-                       else { // copy partial page, zero the rest of page
-                               copy_memory((void *) dom_imva, (void *) elfaddr, filesz);
-                               memset((void *) dom_imva+filesz, 0, PAGE_SIZE-filesz);
+                       if (phdr.p_flags & PF_X) {
+                               privify_memory(dom_imva,filesz);
+                               flush_icache_range (dom_imva, dom_imva+filesz);
                        }
+               }
+               else
+#endif
+               while (memsz > 0) {
+                       p = assign_new_domain_page(d,dom_mpaddr);
+                       BUG_ON (unlikely(p == NULL));
+                       dom_imva = __va_ul(page_to_maddr(p));
+                       if (filesz > 0) {
+                               if (filesz >= PAGE_SIZE)
+                                       copy_memory((void *) dom_imva,
+                                                   (void *) elfaddr,
+                                                   PAGE_SIZE);
+                               else {
+                                       // copy partial page
+                                       copy_memory((void *) dom_imva,
+                                                   (void *) elfaddr, filesz);
+                                       // zero the rest of page
+                                       memset((void *) dom_imva+filesz, 0,
+                                              PAGE_SIZE-filesz);
+                               }
 //FIXME: This test for code seems to find a lot more than objdump -x does
-                       if (phdr.p_flags & PF_X)
-                               privify_memory(dom_imva,PAGE_SIZE);
+                               if (phdr.p_flags & PF_X) {
+                                       privify_memory(dom_imva,PAGE_SIZE);
+                                       flush_icache_range(dom_imva,
+                                                          dom_imva+PAGE_SIZE);
+                               }
+                       }
+                       else if (memsz > 0) {
+                                /* always zero out entire page */
+                               memset((void *) dom_imva, 0, PAGE_SIZE);
+                       }
+                       memsz -= PAGE_SIZE;
+                       filesz -= PAGE_SIZE;
+                       elfaddr += PAGE_SIZE;
+                       dom_mpaddr += PAGE_SIZE;
                }
-               else if (memsz > 0) // always zero out entire page
-                       memset((void *) dom_imva, 0, PAGE_SIZE);
-               memsz -= PAGE_SIZE; filesz -= PAGE_SIZE;
-               elfaddr += PAGE_SIZE; dom_mpaddr += PAGE_SIZE;
-       }
        }
 }
 
@@ -1086,7 +1105,6 @@ int construct_dom0(struct domain *d,
 
        new_thread(v, pkern_entry, 0, 0);
        physdev_init_dom0(d);
-       sync_split_caches();
 
        // FIXME: Hack for keyboard input
        //serial_input_init();
index e3feb2796644b3bb0c8aff000f6b9554b59d79f9..438d50c05937024d3438cbbe9d3749fe9e707587 100644 (file)
@@ -60,7 +60,9 @@ void build_hypercall_bundle(UINT64 *imva, UINT64 brkimm, UINT64 hypnum, UINT64 r
        bundle.slot0 = slot0.inst; bundle.slot2 = slot2.inst;
        bundle.slot1a = slot1.inst; bundle.slot1b = slot1.inst >> 18;
        
-       *imva++ = bundle.i64[0]; *imva = bundle.i64[1];
+       imva[0] = bundle.i64[0]; imva[1] = bundle.i64[1];
+       ia64_fc (imva);
+       ia64_fc (imva + 1);
 }
 
 void build_pal_hypercall_bundles(UINT64 *imva, UINT64 brkimm, UINT64 hypnum)
@@ -83,6 +85,8 @@ void build_pal_hypercall_bundles(UINT64 *imva, UINT64 brkimm, UINT64 hypnum)
        bundle.slot0 = slot_a5.inst;
        imva[0] = bundle.i64[0];
        imva[1] = bundle.i64[1];
+       ia64_fc (imva);
+       ia64_fc (imva + 1);
        
        /* Copy the second bundle and patch the hypercall vector.  */
        bundle.i64[0] = pal_call_stub[2];
@@ -93,6 +97,8 @@ void build_pal_hypercall_bundles(UINT64 *imva, UINT64 brkimm, UINT64 hypnum)
        bundle.slot0 = slot_m37.inst;
        imva[2] = bundle.i64[0];
        imva[3] = bundle.i64[1];
+       ia64_fc (imva + 2);
+       ia64_fc (imva + 3);
 }
 
 
index 915ec038cc07f43a4e7dd5c51f7d229c4907b5b6..ac90d322fc47911c26762b2318b84df27d54ce58 100644 (file)
@@ -363,26 +363,6 @@ void panic_domain(struct pt_regs *regs, const char *fmt, ...)
        domain_crash_synchronous ();
 }
 
-/* FIXME: for the forseeable future, all cpu's that enable VTi have split
- *  caches and all cpu's that have split caches enable VTi.  This may
- *  eventually be untrue though. */
-#define cpu_has_split_cache    vmx_enabled
-extern unsigned int vmx_enabled;
-
-void sync_split_caches(void)
-{
-       unsigned long ret, progress = 0;
-
-       if (cpu_has_split_cache) {
-               /* Sync d/i cache conservatively */
-               ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
-               if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED))
-                       printk("PAL CACHE FLUSH failed\n");
-               else printk("Sync i/d cache for guest SUCC\n");
-       }
-       else printk("sync_split_caches ignored for CPU with no split cache\n");
-}
-
 ///////////////////////////////
 // from arch/x86/mm.c
 ///////////////////////////////